free_xenheap_page((unsigned long)d->mm.perdomain_pt);
}
-void arch_do_createdomain(struct exec_domain *d)
+void arch_do_createdomain(struct exec_domain *ed)
{
+ struct domain *d = ed->domain;
d->shared_info = (void *)alloc_xenheap_page();
memset(d->shared_info, 0, PAGE_SIZE);
+ ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
d->shared_info->arch.mfn_to_pfn_start =
virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
- SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d->domain);
+ SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
PAGE_SHIFT] = INVALID_P2M_ENTRY;
- d->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
- memset(d->mm.perdomain_pt, 0, PAGE_SIZE);
- machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >>
+ ed->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
+ memset(ed->mm.perdomain_pt, 0, PAGE_SIZE);
+ machine_to_phys_mapping[virt_to_phys(ed->mm.perdomain_pt) >>
PAGE_SHIFT] = INVALID_P2M_ENTRY;
}
}
/* Set up shared-info area. */
- update_dom_time(ed->shared_info);
- ed->shared_info->domain_time = 0;
+ update_dom_time(p->shared_info);
+ p->shared_info->domain_time = 0;
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
- ed->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
- ed->shared_info->n_vcpu = 1;
+ p->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
+ p->shared_info->n_vcpu = 1;
/* Install the new page tables. */
__cli();
si = (start_info_t *)vstartinfo_start;
memset(si, 0, PAGE_SIZE);
si->nr_pages = p->tot_pages;
- si->shared_info = virt_to_phys(ed->shared_info);
+ si->shared_info = virt_to_phys(p->shared_info);
si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
si->pt_base = vpt_start;
si->nr_pt_frames = nr_pt_pages;
irq_desc_t *desc;
unsigned int i, j, pirq;
u32 m;
- shared_info_t *s = d->exec_domain[0]->shared_info;
+ shared_info_t *s = d->shared_info;
for ( i = 0; i < ARRAY_SIZE(d->pirq_mask); i++ )
{
write_unlock_irq(&time_lock);
- update_dom_time(current->shared_info);
+ update_dom_time(current->domain->shared_info);
}
gtb->cs = ti->cs;
gtb->eip = ti->address;
if ( TI_GET_IF(ti) )
- ed->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
return;
xen_fault:
gtb->cs = ti->cs;
gtb->eip = ti->address;
if ( TI_GET_IF(ti) )
- ed->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
}
asmlinkage void do_double_fault(void)
gtb->cs = ti->cs;
gtb->eip = ti->address;
if ( TI_GET_IF(ti) )
- ed->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
return;
xen_fault:
gtb->cs = ti->cs;
gtb->eip = ti->address;
if ( TI_GET_IF(ti) )
- ed->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ ed->vcpu_info->evtchn_upcall_mask = 1;
return;
gp_in_kernel:
/* No special register assumptions */
failsafe_callback:
GET_CURRENT(%ebx)
- movl DOMAIN_processor(%ebx),%eax
+ movl EDOMAIN_processor(%ebx),%eax
shl $4,%eax
lea guest_trap_bounce(%eax),%edx
- movl DOMAIN_failsafe_addr(%ebx),%eax
+ movl EDOMAIN_failsafe_addr(%ebx),%eax
movl %eax,GTB_eip(%edx)
- movl DOMAIN_failsafe_sel(%ebx),%eax
+ movl EDOMAIN_failsafe_sel(%ebx),%eax
movw %ax,GTB_cs(%edx)
call create_bounce_frame
subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
notl %ecx
cli # tests must not race interrupts
/*test_softirqs:*/
- movl DOMAIN_processor(%ebx),%eax
+ movl EDOMAIN_processor(%ebx),%eax
shl $6,%eax # sizeof(irq_cpustat) == 64
test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
jnz process_softirqs
/*test_guest_events:*/
- movl DOMAIN_shared_info(%ebx),%eax
- testb $0xFF,SHINFO_upcall_mask(%eax)
+ movl EDOMAIN_vcpu_info(%ebx),%eax
+ testb $0xFF,VCPUINFO_upcall_mask(%eax)
jnz restore_all_guest
- testb $0xFF,SHINFO_upcall_pending(%eax)
+ testb $0xFF,VCPUINFO_upcall_pending(%eax)
jz restore_all_guest
- movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
+ movb $1,VCPUINFO_upcall_mask(%eax) # Upcalls are masked during delivery
/*process_guest_events:*/
- movl DOMAIN_processor(%ebx),%edx
+ movl EDOMAIN_processor(%ebx),%edx
shl $4,%edx # sizeof(guest_trap_bounce) == 16
lea guest_trap_bounce(%edx),%edx
- movl DOMAIN_event_addr(%ebx),%eax
+ movl EDOMAIN_event_addr(%ebx),%eax
movl %eax,GTB_eip(%edx)
- movl DOMAIN_event_sel(%ebx),%eax
+ movl EDOMAIN_event_sel(%ebx),%eax
movw %ax,GTB_cs(%edx)
call create_bounce_frame
jmp restore_all_guest
test $2,%cl
jz 1f /* jump if returning to an existing ring-1 activation */
/* obtain ss/esp from TSS -- no current ring-1 activations */
- movl DOMAIN_processor(%ebx),%eax
+ movl EDOMAIN_processor(%ebx),%eax
/* next 4 lines multiply %eax by 8320, which is sizeof(tss_struct) */
movl %eax, %ecx
shll $7, %ecx
ALIGN
process_guest_exception_and_events:
- movl DOMAIN_processor(%ebx),%eax
+ movl EDOMAIN_processor(%ebx),%eax
shl $4,%eax
lea guest_trap_bounce(%eax),%edx
testb $~0,GTB_flags(%edx)
gtb->cs = ti->cs;
gtb->eip = ti->address;
if ( TI_GET_IF(ti) )
- d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
+ d->vcpu_info->evtchn_upcall_mask = 1;
}
return 1;
op->u.getdomaininfo.max_pages = d->max_pages;
op->u.getdomaininfo.cpu_time = ed->cpu_time;
op->u.getdomaininfo.shared_info_frame =
- __pa(ed->shared_info) >> PAGE_SHIFT;
+ __pa(d->shared_info) >> PAGE_SHIFT;
if ( op->u.getdomaininfo.ctxt != NULL )
{
for_each_exec_domain(d, ed)
free_perdomain_pt(ed);
- free_xenheap_page((unsigned long)d->exec_domain[0]->shared_info);
+ free_xenheap_page((unsigned long)d->shared_info);
free_domain_struct(d);
}
goto out;
/* Set up the shared info structure. */
- update_dom_time(p->exec_domain[0]->shared_info);
+ update_dom_time(p->shared_info);
set_bit(DF_CONSTRUCTED, &p->d_flags);
}
}
- page = virt_to_page(d->exec_domain[0]->shared_info);
+ page = virt_to_page(d->shared_info);
printk("Shared_info@%08x: caf=%08x, taf=%08x\n",
page_to_phys(page), page->count_info,
page->u.inuse.type_info);
ed->processor,
test_bit(EDF_RUNNING, &ed->ed_flags) ? 'T':'F',
ed->ed_flags,
- ed->shared_info->vcpu_data[0].evtchn_upcall_pending,
- ed->shared_info->vcpu_data[0].evtchn_upcall_mask);
+ ed->vcpu_info->evtchn_upcall_pending,
+ ed->vcpu_info->evtchn_upcall_mask);
}
printk("Notifying guest...\n");
send_guest_virq(d->exec_domain[0], VIRQ_DEBUG);
long do_block(void)
{
ASSERT(current->id != IDLE_DOMAIN_ID);
- current->shared_info->vcpu_data[0].evtchn_upcall_mask = 0;
+ current->vcpu_info->evtchn_upcall_mask = 0;
set_bit(EDF_BLOCKED, ¤t->ed_flags);
TRACE_2D(TRC_SCHED_BLOCK, current->id, current);
__enter_scheduler();
/* Ensure that the domain has an up-to-date time base. */
if ( !is_idle_task(next->domain) )
- update_dom_time(next->shared_info);
+ update_dom_time(next->domain->shared_info);
if ( unlikely(prev == next) )
return;
TRACE_0D(TRC_SCHED_T_TIMER_FN);
if ( !is_idle_task(p->domain) ) {
- update_dom_time(p->shared_info);
+ update_dom_time(p->domain->shared_info);
send_guest_virq(p, VIRQ_TIMER);
}
struct domain *p = (struct domain *)data;
struct exec_domain *ed = p->exec_domain[0];
TRACE_0D(TRC_SCHED_DOM_TIMER_FN);
- update_dom_time(ed->shared_info);
+ update_dom_time(p->shared_info);
send_guest_virq(ed, VIRQ_TIMER);
}
TRACE_0D(TRC_SCHED_FALLBACK_TIMER_FN);
if ( !is_idle_task(p) )
- update_dom_time(ed->shared_info);
+ update_dom_time(p->shared_info);
fallback_timer[ed->processor].expires = NOW() + MILLISECS(500);
add_ac_timer(&fallback_timer[ed->processor]);
#define XREGS_fs 0x3C
#define XREGS_gs 0x40
-/* Offsets in 'struct domain' --- AUTO-GENERATE ME! */
-#define DOMAIN_processor 0
-#define DOMAIN_shared_info 4
-#define DOMAIN_event_sel 8
-#define DOMAIN_event_addr 12
-#define DOMAIN_failsafe_sel 16
-#define DOMAIN_failsafe_addr 20
-
-/* Offsets in shared_info_t --- AUTO-GENERATE ME! */
-#define SHINFO_upcall_pending /* 0 */
-#define SHINFO_upcall_mask 1
+/* Offsets in 'struct exec_domain' --- AUTO-GENERATE ME! */
+#define EDOMAIN_processor 0
+#define EDOMAIN_vcpu_info 4
+#define EDOMAIN_event_sel 8
+#define EDOMAIN_event_addr 12
+#define EDOMAIN_failsafe_sel 16
+#define EDOMAIN_failsafe_addr 20
+
+/* Offsets in vcpu_info_t --- AUTO-GENERATE ME! */
+#define VCPUINFO_upcall_pending /* 0 */
+#define VCPUINFO_upcall_mask 1
/* Offsets in 'struct guest_trap_bounce' --- AUTO-GENERATE ME! */
#define GTB_error_code 0
/* No support for multi-processor guests. */
#define MAX_VIRT_CPUS 4
+/*
+ * Per-VCPU information goes here. This will be cleaned up more when Xen
+ * actually supports multi-VCPU guests.
+ */
+typedef struct vcpu_info_st
+{
+ /*
+ * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
+ * a pending notification for a particular VCPU. It is then cleared
+ * by the guest OS /before/ checking for pending work, thus avoiding
+ * a set-and-check race. Note that the mask is only accessed by Xen
+ * on the CPU that is currently hosting the VCPU. This means that the
+ * pending and mask flags can be updated by the guest without special
+ * synchronisation (i.e., no need for the x86 LOCK prefix).
+ * This may seem suboptimal because if the pending flag is set by
+ * a different CPU then an IPI may be scheduled even when the mask
+ * is set. However, note:
+ * 1. The task of 'interrupt holdoff' is covered by the per-event-
+ * channel mask bits. A 'noisy' event that is continually being
+ * triggered can be masked at source at this very precise
+ * granularity.
+ * 2. The main purpose of the per-VCPU mask is therefore to restrict
+ * reentrant execution: whether for concurrency control, or to
+ * prevent unbounded stack usage. Whatever the purpose, we expect
+ * that the mask will be asserted only for short periods at a time,
+ * and so the likelihood of a 'spurious' IPI is suitably small.
+ * The mask is read before making an event upcall to the guest: a
+ * non-zero mask therefore guarantees that the VCPU will not receive
+ * an upcall activation. The mask is cleared when the VCPU requests
+ * to block: this avoids wakeup-waiting races.
+ */
+ u8 evtchn_upcall_pending;
+ u8 evtchn_upcall_mask;
+ u8 pad0, pad1;
+} PACKED vcpu_info_t;
+
/*
* Xen/guestos shared data -- pointer provided in start_info.
* NB. We expect that this struct is smaller than a page.
*/
typedef struct shared_info_st
{
- /*
- * Per-VCPU information goes here. This will be cleaned up more when Xen
- * actually supports multi-VCPU guests.
- */
- struct {
- /*
- * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
- * a pending notification for a particular VCPU. It is then cleared
- * by the guest OS /before/ checking for pending work, thus avoiding
- * a set-and-check race. Note that the mask is only accessed by Xen
- * on the CPU that is currently hosting the VCPU. This means that the
- * pending and mask flags can be updated by the guest without special
- * synchronisation (i.e., no need for the x86 LOCK prefix).
- * This may seem suboptimal because if the pending flag is set by
- * a different CPU then an IPI may be scheduled even when the mask
- * is set. However, note:
- * 1. The task of 'interrupt holdoff' is covered by the per-event-
- * channel mask bits. A 'noisy' event that is continually being
- * triggered can be masked at source at this very precise
- * granularity.
- * 2. The main purpose of the per-VCPU mask is therefore to restrict
- * reentrant execution: whether for concurrency control, or to
- * prevent unbounded stack usage. Whatever the purpose, we expect
- * that the mask will be asserted only for short periods at a time,
- * and so the likelihood of a 'spurious' IPI is suitably small.
- * The mask is read before making an event upcall to the guest: a
- * non-zero mask therefore guarantees that the VCPU will not receive
- * an upcall activation. The mask is cleared when the VCPU requests
- * to block: this avoids wakeup-waiting races.
- */
- u8 evtchn_upcall_pending;
- u8 evtchn_upcall_mask;
- u8 pad0, pad1;
- } PACKED vcpu_data[MAX_VIRT_CPUS]; /* 0 */
+ vcpu_info_t vcpu_data[MAX_VIRT_CPUS]; /* 0 */
u32 n_vcpu;
static inline void evtchn_set_pending(struct domain *d, int port)
{
struct exec_domain *ed = d->exec_domain[0];
- shared_info_t *s = ed->shared_info;
+ shared_info_t *s = d->shared_info;
int running;
/* These three operations must happen in strict order. */
!test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
{
/* The VCPU pending flag must be set /after/ update to evtchn-pend. */
- set_bit(0, &s->vcpu_data[0].evtchn_upcall_pending);
+ set_bit(0, &ed->vcpu_info->evtchn_upcall_pending);
/*
* NB1. 'flags' and 'processor' must be checked /after/ update of
}
#define event_pending(_d) \
- ((_d)->shared_info->vcpu_data[0].evtchn_upcall_pending && \
- !(_d)->shared_info->vcpu_data[0].evtchn_upcall_mask)
+ ((_d)->vcpu_info->evtchn_upcall_pending && \
+ !(_d)->vcpu_info->evtchn_upcall_mask)
#endif /* __XEN_EVENT_H__ */
u32 processor; /* 00: current processor */
/* An unsafe pointer into a shared data area. */
- shared_info_t *shared_info; /* 04: shared data area */
+ vcpu_info_t *vcpu_info; /* 04: vcpu info pointer */
/*
* Return vectors pushed to us by guest OS.
domid_t id;
s_time_t create_time;
+ shared_info_t *shared_info; /* shared data area */
+
spinlock_t page_alloc_lock; /* protects all the following fields */
struct list_head page_list; /* linked list, of size tot_pages */
struct list_head xenpage_list; /* linked list, of size xenheap_pages */